# import relevant modules
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from datetime import datetime, timedelta
import plotly.express as px
from plotly.subplots import make_subplots
from tqdm.notebook import tqdm
import plotly.graph_objects as go
import glob
import sys
sys.path.append('../scripts/')
from analysis import get_correlation, peak_analysis, peak_ranges
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
# load only hashtags which are relevant
topics_df = pd.read_json('../../data/BTW17_Twitter/lda/hashtag_topics.json')
hashtags = topics_df['hashtag'].tolist()
# load hashtag timeseries
hashtag_df = pd.read_json('../../data/BTW17_Twitter/hashtags/hashtag_counts.json')
hashtag_df.head(3)
# load politicans metadata and keep only relevant data
persons_df = pd.read_csv('../../data/BTW17_Suggestions/btw_politicians_demographic.csv')
persons_df.drop(columns=['Unnamed: 0', 'Born', 'Bundesland', 'Age'], inplace=True)
persons_df['Name'] = persons_df['Name'].apply(lambda x: x.lower())
persons_df.rename(columns={'Name':'queryterm', 'Party':'party', 'Gender':'gender'}, inplace=True)
persons_df.head(3)
cluster_cat = pd.read_csv('../../data/BTW17_Suggestions/suggestions/cluster_categories.csv', delimiter=',')
cluster_cat.drop(columns='Unnamed: 0', inplace=True)
cluster_cat['size'] = cluster_cat['sugg'].apply(lambda x: x.count(', ')+1)
cluster_cat.head(3)
# load suggestions timeseries
tmp = pd.read_parquet('../../data/BTW17_Suggestions/processed/suggestions.parquet')
tmp['date'] = pd.to_datetime(tmp['date']).dt.date
suggestions_df = pd.DataFrame()
suggestions_df[['date', 'queryterm', 'suggestion', 'count']] = tmp.groupby(['date', 'queryterm', 'suggestion'], as_index=False).count()
suggestions_df = suggestions_df.merge(persons_df, how='left', on='queryterm')
# load vector similarites
similarity_df = pd.read_json('../../data/BTW17_Suggestions/suggestions/vector_similarity.json')
similarity_df['hashtags'] = [hashtags for i in similarity_df.index]
similarity_df['suggestion'] = similarity_df['suggestion'].apply(lambda x: ' '.join(x))
# join suggestion cluster and group again
suggestions_df = suggestions_df.merge(similarity_df, how='inner', on='suggestion')
suggestions_df = suggestions_df.groupby(['date', 'queryterm', 'party', 'gender', 'cluster'], as_index=False).sum('count')
suggestions_df.head(3)
# remodel similarity cluster to hashtags
similarity_df = similarity_df.set_index(['suggestion', 'cluster']).apply(pd.Series.explode).reset_index()
similarity_df['similarity_scores'] = pd.to_numeric(similarity_df['similarity_scores'])
similarity_df = similarity_df.groupby(['cluster', 'hashtags'], as_index=False).mean('similarity_scores')
similarity_df = similarity_df.merge(cluster_cat, how='left', on='cluster')
# filter out category rauschen
similarity_df = similarity_df[similarity_df['category']!='Rauschen'].reset_index(drop=True)
similarity_df.head(3)
# prepare data for tlcc
# filter everything with sim_score < 0.5
sim_df = similarity_df[similarity_df['similarity_scores']>=0.5].reset_index(drop=True)
# group suggestions to cluster
cluster_df = suggestions_df.groupby(['date', 'cluster'], as_index=False).sum('count')
cluster_df.rename(columns={'count':'cluster_count'}, inplace=True)
# group suggestions per cluster and party
cluster_party_df = suggestions_df.groupby(['date', 'party', 'cluster'], as_index=False).sum('count')
cluster_party_df.rename(columns={'count':'cluster_count'}, inplace=True)
# group suggestions per cluster and gender
cluster_gender_df = suggestions_df.groupby(['date', 'gender', 'cluster'], as_index=False).sum('count')
cluster_gender_df.rename(columns={'count':'cluster_count'}, inplace=True)
hashtag_df.rename(columns={'count':'hashtag_count'}, inplace=True)
colors = px.colors.qualitative.Antique
colors.extend(px.colors.qualitative.Antique)
delays = []
for i in range(0, 71, 7):
delays.append(i)
dfs = []
for i in delays:
dfs.append(get_correlation(i, hashtag_df, cluster_df, cluster_gender_df, cluster_party_df, sim_df))
for i in range(len(dfs)):
dfs[i].to_json(f'../../data/Analysis/df_{delays[i]}_delays.json')
# set to *.json to load all
input_loc = '../../data/Analysis/*delays.json'
input_files = glob.glob(input_loc)
dfs = []
for file in input_files:
data = pd.read_json(file)
data = data.merge(cluster_cat, how='left', on='cluster')
#data = data[(data['pearsonr']>=0)&(data['p_value']<=0.05)&(data['gender']=='all')&(data['party']=='all')]
data = data[(data['pearsonr']>=0)]
dfs.append(data)
print(f'Anzahl möglicher Kombinationen: {len(similarity_df[similarity_df["category"]!="Rauschen"])}')
print(f'Anzahl relevanter Kombinationen: {len(sim_df)}')
print(f'Anzahl Kombinationen pro Hashtag: {len(sim_df)/sim_df["hashtags"].nunique()}')
print(f'Anteil relevanter Kombinationen: {round(len(sim_df[sim_df["category"]!="Rauschen"])/len(similarity_df[similarity_df["category"]!="Rauschen"])*100,2)}%')
for category in sim_df['category'].unique():
tmp = sim_df[sim_df['category']==category]
print(f'Kategorie: {category}, Anzahl relevanter Kombinationen: {tmp.groupby(["cluster", "hashtags"], as_index=False).ngroups}')
sim_df.groupby('category', as_index=False)['similarity_scores'].mean()
# load cluster_df and join categories
cluster_cat_df = pd.read_json('../../data/BTW17_Suggestions/suggestions/cluster.json')
cluster_cat_df = cluster_cat_df.merge(cluster_cat, how='left', on='cluster')
tmp = pd.DataFrame()
tmp['Cluster'] = cluster_cat_df['cluster'].value_counts().index
tmp['Clustergröße'] = cluster_cat_df['cluster'].value_counts().values
tmp = tmp.merge(cluster_cat, how='left', left_on='Cluster', right_on='cluster')
tmp = tmp[tmp['category']!='Rauschen']
tmp2 = cluster_cat_df.groupby('category', as_index=False)['cluster'].nunique().sort_values(by='cluster', ascending=False)
tmp = tmp.merge(tmp2, on='category')
tmp['category'] = tmp.apply(lambda x: x['category'] + f' ({x["cluster_y"]} Cluster)', axis=1)
tmp.rename(columns={'category':'Kategorie'}, inplace=True)
fig = px.box(tmp[tmp['Kategorie']!='Rauschen'], x='Kategorie', y='Clustergröße',# points='all',
color='Kategorie',
template='simple_white', color_discrete_sequence=px.colors.qualitative.Antique)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
fig = px.scatter(cluster_cat_df, x='t-SNE(x)', y='t-SNE(y)', color='category', hover_name='suggestion',
template='simple_white', color_discrete_sequence=px.colors.qualitative.Antique)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
# regressionsanalyse
reg_df = suggestions_df.groupby(['party', 'gender', 'cluster'], as_index=False).sum()
tmp = similarity_df.groupby(['cluster', 'category'], as_index=False).mean()
reg_df = reg_df.merge(tmp, how='left', on='cluster')
reg_df.dropna(inplace=True)
reg_df = reg_df.reset_index(drop=True)
reg_df.head(3)
Keine Auswirkung auf den Similarity Score erkennbar
reg = smf.ols('similarity_scores ~ size', data=reg_df).fit()
A = np.identity(len(reg.params))
print(reg.f_test(A))
print(reg.rsquared)
reg.summary().tables[1]
Keine Auswirkung auf den Similarity Score erkennbar
reg = smf.ols('similarity_scores ~ C(party)', data=reg_df).fit()
A = np.identity(len(reg.params))
print(reg.f_test(A))
print(reg.rsquared)
reg.summary().tables[1]
Keine Auswirkung auf den Similarity Score erkennbar
reg = smf.ols('similarity_scores ~ C(gender)', data=reg_df).fit()
A = np.identity(len(reg.params))
print(reg.f_test(A))
print(reg.rsquared)
reg.summary().tables[1]
Auswirkungen tbd
reg = smf.ols('similarity_scores ~ C(category)', data=reg_df).fit()
A = np.identity(len(reg.params))
print(reg.f_test(A))
print(reg.rsquared)
reg.summary().tables[1]
Keine Auswirkung erkennbar
reg = smf.ols('similarity_scores ~ count', data=reg_df).fit()
A = np.identity(len(reg.params))
print(reg.f_test(A))
print(reg.rsquared)
reg.summary().tables[1]
Fragestellung: Wie lange dauert die Durchdringung im Durchschnitt und nach den jeweiligen Dimensionen? Messung: TLCC mit Pearson R und p-Wert
# übersicht der korrelationen und deren p-werte pro time lag
delay_list = []
r_list = []
p_list = []
for i in range(len(dfs)):
delay_list.append(int(delays[i]/7))
df = dfs[i][(dfs[i]['gender']=='all')&(dfs[i]['party']=='all')]
r_list.append(round(df['pearsonr'].mean(),3))
p_values = df['p_value'].to_numpy()
p_list.append(round(stats.combine_pvalues(p_values)[1],3))
tmp = pd.DataFrame(data={'Time Lag (in Wochen)': delay_list, 'Pearson R': r_list, 'P-Wert': p_list})
tmp
Sämtliche Korrelationen sind signifikant (p<0.05), deshalb Betrachtung im Plot.
fig = px.line(tmp, x='Time Lag (in Wochen)', y='Pearson R',
template='simple_white', color_discrete_sequence=px.colors.qualitative.Antique)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
Plateau zwischen 6-9 Wochen, allerdings sehr geringe Korrelation. Ausnahmen bei 9 Wochen sind nur wenige:
tmp = dfs[9][(dfs[9]['gender']=='all')&(dfs[9]['party']=='all')]
tmp = tmp[tmp['pearsonr']>=0.5]
tmp.sort_values(by='pearsonr', ascending=False)[['cluster', 'hashtags', 'category_x', 'pearsonr', 'similarity_scores']]
delay_list = []
categories = []
r_list = []
p_list = []
for i in range(len(dfs)):
for category in set(similarity_df['category']):
delay_list.append(delays[i])
df = dfs[i][(dfs[i]['gender']=='all')&(dfs[i]['party']=='all')]
categories.append(category)
r_list.append(df[df['category_x']==category]['pearsonr'].mean())
p_values = df[df['category_x']==category]['p_value'].to_numpy()
p_list.append(stats.combine_pvalues(p_values)[1])
tmp = pd.DataFrame(data={'Delay': delay_list, 'Kategorie': categories, 'Pearson R': r_list, 'P-Wert': p_list})
tmp = tmp.dropna()
tmp = tmp[tmp['Kategorie']!='Rauschen']
fig = make_subplots(rows=1, cols=2, subplot_titles=('Pearson R', 'P-Werte'),
shared_yaxes=True, horizontal_spacing=0.15)
fig.add_trace(go.Heatmap(z=tmp['Pearson R'], x=tmp['Kategorie'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu, colorbar_x=0.45), row=1, col=1)
fig.add_trace(go.Heatmap(z=tmp['P-Wert'], x=tmp['Kategorie'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu_r), row=1, col=2)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
Höchste signifikante Korrelation im Bereich zwischen 6-9 Wochen. Insbesondere Personen und Wirtschaft. Bestätigung der Ergebnisse der Gesamtbetrachtung.
delay_list = []
gender_list = []
r_list = []
p_list = []
for i in range(len(dfs)):
for gender in set(suggestions_df['gender']):
delay_list.append(delays[i])
df = dfs[i][(dfs[i]['gender']!='all')&(dfs[i]['party']=='all')]
gender_list.append(gender)
df = df[df['category_x']!='Rauschen']
r_list.append(df[df['gender']==gender]['pearsonr'].mean())
p_values = df[df['gender']==gender]['p_value'].to_numpy()
p_list.append(stats.combine_pvalues(p_values)[1])
tmp = pd.DataFrame(data={'Delay': delay_list, 'Gender': gender_list, 'Pearson R': r_list, 'P-Wert': p_list})
tmp = tmp.dropna()
fig = make_subplots(rows=1, cols=2, subplot_titles=('Pearson R', 'P-Werte'),
shared_yaxes=True, horizontal_spacing=0.15)
fig.add_trace(go.Heatmap(z=tmp['Pearson R'], x=tmp['Gender'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu, colorbar_x=0.45), row=1, col=1)
fig.add_trace(go.Heatmap(z=tmp['P-Wert'], x=tmp['Gender'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu_r), row=1, col=2)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
Bestätigung der Ergebnisse aus der Gesamtbetrachtung.
delay_list = []
party_list = []
r_list = []
p_list = []
for i in range(len(dfs)):
for party in set(suggestions_df['party']):
delay_list.append(delays[i])
df = dfs[i][(dfs[i]['gender']=='all')&(dfs[i]['party']!='all')]
party_list.append(party)
df = df[df['category_x']!='Rauschen']
r_list.append(df[df['party']==party]['pearsonr'].mean())
p_values = df[df['party']==party]['p_value'].to_numpy()
p_list.append(stats.combine_pvalues(p_values)[1])
tmp = pd.DataFrame(data={'Delay': delay_list, 'Parteien': party_list, 'Pearson R': r_list, 'P-Wert': p_list})
tmp = tmp.dropna()
fig = make_subplots(rows=1, cols=2, subplot_titles=('Pearson R', 'P-Werte'),
shared_yaxes=True, horizontal_spacing=0.15)
fig.add_trace(go.Heatmap(z=tmp['Pearson R'], x=tmp['Parteien'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu, colorbar_x=0.45), row=1, col=1)
fig.add_trace(go.Heatmap(z=tmp['P-Wert'], x=tmp['Parteien'],
y=tmp['Delay'],
colorscale=px.colors.sequential.RdBu_r), row=1, col=2)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()
Grobe Bestätigung der Ergebnisse der Gesamtbetrachtung. Ausnahme AfD: Immer stark korreliert, ggf Partei mit starker Abhängigkeit von Twitter?
peaks_df = pd.read_json('../../data/BTW17_Twitter/peaks/peak_dates.json')
peaks_df['num_peaks'] = peaks_df.apply(lambda x: len(x['lda_dates']) / 7, axis=1)
peaks_df[['peak_start', 'peak_end']] = peaks_df.apply(peak_ranges, axis=1)
peaks_df.drop(columns=['index', 'num_peaks', 'lda_dates'], inplace=True)
peaks_df = peaks_df.set_index(['hashtag']).apply(pd.Series.explode).reset_index()
peaks_df.head(3)
cluster_ts_df = suggestions_df.groupby(['date', 'cluster'], as_index=False).sum('count')
cluster_ts_df.head(3)
analysis_dfs = []
for i in tqdm(range(len(delays[1:]))):
test_range = delays[i+1]
tmp = pd.DataFrame(data=peak_analysis(test_range, sim_df, peaks_df, cluster_ts_df))
print(tmp[(tmp['hashtag']=='all')&(tmp['category']=='all')])
analysis_dfs.append(tmp)
Keine signifikanten Ergebnisse über alle Kategorien. Erwartbar, dass sich Themen unterschiedlicher Kategorien unterschiedlich verhalten.
# save files
for i in range(len(analysis_dfs)):
analysis_dfs[i].to_json(f'../../data/Analysis/peak_df_{delays[i]}_range.json')
# set to *.json to load all
input_loc = '../../data/Analysis/*range.json'
input_files = glob.glob(input_loc)
analysis_dfs = []
for file in input_files:
data = pd.read_json(file)
analysis_dfs.append(data)
# prepare plots
categories = cluster_cat['category'].unique().tolist()
plot_df = {'category':[], 'test_range':[], 't':[], 'p':[]}
for category in categories:
for i in range(len(analysis_dfs)):
tmp = analysis_dfs[i][analysis_dfs[i]['category']==category]
try:
plot_df['test_range'].append(int(tmp['test_range'].values))
plot_df['t'].append(float(tmp['t'].values))
plot_df['p'].append(float(tmp['p'].values))
plot_df['category'].append(category)
except:
pass
plot_df = pd.DataFrame(data=plot_df)
# filter plot to signficant values
plot_df = plot_df[plot_df['p']<=0.05]
plot_df['test_range'] = plot_df['test_range'] / 7
fig = make_subplots(rows=2, cols=4, shared_yaxes='all', shared_xaxes='all',
subplot_titles=['Berufe', 'Medizin', 'Organisationen',
'Orte', 'Personen', 'Politik', 'Wirtschaft'])
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Berufe']['test_range'],
y=plot_df[plot_df['category']=='Berufe']['t'],
name='Berufe',
marker_color=px.colors.qualitative.Antique[0]),
row=1, col=1)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Medizin']['test_range'],
y=plot_df[plot_df['category']=='Medizin']['t'],
name='Medizin',
marker_color=px.colors.qualitative.Antique[3]),
row=1, col=2)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Organisationen']['test_range'],
y=plot_df[plot_df['category']=='Organisationen']['t'],
name='Organisationen',
marker_color=px.colors.qualitative.Antique[4]),
row=1, col=3)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Orte']['test_range'],
y=plot_df[plot_df['category']=='Orte']['t'],
name='Orte',
marker_color=px.colors.qualitative.Antique[5]),
row=1, col=4)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Personen']['test_range'],
y=plot_df[plot_df['category']=='Personen']['t'],
name='Personen',
marker_color=px.colors.qualitative.Antique[6]),
row=2, col=1)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Politik']['test_range'],
y=plot_df[plot_df['category']=='Politik']['t'],
name='Politik',
marker_color=px.colors.qualitative.Antique[7]),
row=2, col=2)
fig.add_trace(go.Bar(x=plot_df[plot_df['category']=='Wirtschaft']['test_range'],
y=plot_df[plot_df['category']=='Wirtschaft']['t'],
name='Wirtschaft',
marker_color=px.colors.qualitative.Antique[10]),
row=2, col=3)
fig.update_yaxes(title='t', col=1)
fig.update_xaxes(title='Range (in Wochen)', row=2)
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15),
template='simple_white', showlegend=False)
fig.show()
tmp_dict = {'category': [], 'mean': [], 'std': []}
tmp = cluster_df[cluster_df['cluster'].isin(sim_df['cluster'].unique())]
tmp = tmp.merge(cluster_cat, how='left', on='cluster')
for category in tmp['category'].unique():
tmp2 = tmp[tmp['category']==category]
tmp2 = tmp2.groupby('date', as_index=False).mean('cluster_count')
tmp2['cluster_count'] = (tmp2['cluster_count'] - tmp2['cluster_count'].min())/ (tmp2['cluster_count'].max() - tmp2['cluster_count'].min())
tmp_dict['category'].append(category)
tmp_dict['mean'].append(tmp2["cluster_count"].mean())
tmp_dict['std'].append(tmp2["cluster_count"].std())
merge_df = pd.DataFrame(data=tmp_dict)
plot_df = plot_df.merge(merge_df, how='left', on='category')
plot_df.groupby('category', as_index=False).mean()[['category', 'mean', 'std']]
fig = px.imshow(plot_df[['t', 'mean', 'std']].corr(), color_continuous_scale='RdBu')
fig.update_layout(font=dict(family='Computer Modern', color='black', size=15))
fig.show()